In [ ]:
##################################################################################################
##   Notebook used for extracting text from html files. Some basic preprocessing tasks 
##   v3.0 Unsupervised classification using gensim
##   Required packages: os, logging, collections, gensim
##################################################################################################

In [ ]:
## Cross checking the python version that has been installed
##
import platform
platform.python_version()

In [ ]:
## Importing the logging function which provides logs for debugging
##
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

In [ ]:
## Raw corpus used for training n unsupervised gensim model
##
raw_corpus = ["Human machine interface for lab abc computer applications",
             "A survey of user opinion of computer system response time",
             "The EPS user interface management system",
             "System and human system engineering testing of EPS",              
             "Relation of user perceived response time to error measurement",
             "The generation of random binary unordered trees",
             "The intersection graph of paths in trees",
             "Graph minors IV Widths of trees and well quasi ordering",
             "Graph minors A survey"]

In [ ]:
# Create a set of frequent words. Using only a rudimentary set of stop words
# Kindly note in our use casse removing stop words may create problems
stoplist = set('for a of the and to in'.split(' '))

In [ ]:
# Lowercase each document, split it by white space and filter out stopwords
texts = [[word for word in document.lower().split() if word not in stoplist]
         for document in raw_corpus]
texts

In [ ]:
# Count word frequencies
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
    for token in text:
        frequency[token] += 1
frequency

In [ ]:
# Only keep words that appear more than once
processed_corpus = [[token for token in text if frequency[token] > 1] for text in texts]
processed_corpus
Associate each word in the corpus with an unique integer ID. This will be a small corpus

In [ ]:
from gensim import corpora

dictionary = corpora.Dictionary(processed_corpus)
print(dictionary)

## Dictionary is stored to disk using save()
dictionary.save('deerwester.dict')

In [ ]:
# Simply enumerating through the dictionary
for i in enumerate(dictionary):
    print i

In [ ]:
# Better way to do the above
print(dictionary.token2id)

In [ ]:
## Check the output with a new document
##
new_doc = 'Human computer interaction'
new_vec = dictionary.doc2bow(new_doc.lower().split())
new_vec

In [ ]:
# Converting the entire corpus into a list of vectors
bow_corpus = [dictionary.doc2bow(text) for text in processed_corpus]
bow_corpus
# Mode persistency by serializing the list of vectors and storing to disk, for later use
corpora.MmCorpus.serialize('deerwester.mm', bow_corpus)
In gensim documents are represented as vectors so a model can be thought of as a transformation between two vector spaces. The detail of this transformation is learned from the training corpus

In [ ]:
from gensim import models
# train the model
tfidf = models.TfidfModel(bow_corpus)
Check the difference in the tfidf tuples. When we use minors vs minor. Note that minor is not present in the training data while, minors is present

In [ ]:
#transform the "system minors" sting
print tfidf[dictionary.doc2bow("system minors".lower().split())]

In [ ]:
## Check what happens when we use 'minor'instead of 'minors'
## this is a word which the model has not seen
print tfidf[dictionary.doc2bow("system minor".lower().split())]
Note how the word 'minor' has been rejected by the model, and is not vectorized

In [ ]:
## Running the entire corpus through a tfidf transformation
corpus_tfidf = tfidf[bow_corpus]
for doc in corpus_tfidf:
    print(doc)

In [ ]:
from gensim import corpora, models, similarities

In [ ]:
## model persistency using save & load
## 
dictionary = corpora.Dictionary.load('deerwester.dict')
corpus = corpora.MmCorpus('deerwester.mm')

In [ ]:
corpus
for c in corpus:
    print(c)

In [ ]:
dictionary.token2id

In [ ]:
## Run the corpus through a LSI (Latent Semantic Indexing) model. Model is run for 2 topics
##
lsi = models.LsiModel(corpus, id2word=dictionary, num_topics=2)

In [ ]:
## Print out the topics 
##
lsi.print_topics(2)

In [ ]:
## Run the entire corpus through the lsi model 
##
corpus_lsi = lsi[corpus_tfidf]
for doc in corpus_lsi:
    print(doc)

In [ ]:
## Convert a new document to a bag of words and then run through the lsi model
## 
doc = "Human computer interaction"
vec_bow = dictionary.doc2bow(doc.lower().split())
vec_lsi = lsi[vec_bow] # convert the query to LSI space
print(vec_lsi)

In [ ]:
## Again for model persistency store the model to disk
lsi.save('model.lsi') # same for tfidf, lda, ...
lsi = models.LsiModel.load('model.lsi')

In [ ]:
# transform corpus to LSI space and index it

index = similarities.MatrixSimilarity(lsi[corpus])

In [ ]:
## Save and load 
index.save('deerwester.index')
index = similarities.MatrixSimilarity.load('deerwester.index')

In [ ]:
sims = index[vec_lsi] # perform a similarity query against the corpus

In [ ]:
print(list(enumerate(sims))) # print (document_number, document_similarity) 2-tuples

In [ ]:
sims = sorted(enumerate(sims), key=lambda item: -item[1])
print(sims) # print sorted (document number, similarity score) 2-tuples

In [ ]:
raw_corpus = ["Human machine interface for lab abc computer applications",
             "A survey of user opinion of computer system response time",
             "The EPS user interface management system",
             "System and human system engineering testing of EPS",              
             "Relation of user perceived response time to error measurement",
             "The generation of random binary unordered trees",
             "The intersection graph of paths in trees",
             "Graph minors IV Widths of trees and well quasi ordering",
             "Graph minors A survey"]

In [ ]:
for _, cos in enumerate(sims):
    print cos, raw_corpus[cos[0]]

In [ ]: